New upstream version 2.0.53
authorPaul Gevers <elbrus@debian.org>
Sat, 15 Nov 2025 07:32:52 +0000 (08:32 +0100)
committerPaul Gevers <elbrus@debian.org>
Sat, 15 Nov 2025 07:32:52 +0000 (08:32 +0100)
20 files changed:
.github/workflows/ci.yml
debian/changelog
docker/Dockerfile
include/siri/version.h
itest/Dockerfile
itest/data_google_finance.py
itest/siridb-random-data.py
itest/tee_server.py
itest/test_compression.py
itest/test_group.py
itest/test_http_api.py
itest/testing/__init__.py
itest/testing/server.py
src/ctree/ctree.c
src/siri/db/aggregate.c
src/siri/db/points.c
src/siri/db/query.c
src/siri/db/series.c
src/siri/service/client.c
src/siri/service/request.c

index 4a0e77638edf74ed16a1803e44a0ec228dc74319..88600d69617d41a7a5f6b06d76cd8489fdea9071 100644 (file)
@@ -11,7 +11,7 @@ jobs:
   build:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v5
       - name: Install libcleri
         run: |
           git clone https://github.com/cesbit/libcleri.git
@@ -21,7 +21,7 @@ jobs:
       - name: Install dependencies
         run: |
           sudo apt-get update -y
-          sudo apt-get install -y libuv1-dev libpcre2-dev libyajl-dev valgrind
+          sudo apt-get install -y uuid-dev libuv1-dev libpcre2-dev libyajl-dev valgrind
       - name: Run tests
         run: |
           cd ./Release/
index e1dc9dd60a7adf9b4c1a6b0ac29d6d15c066d998..2c7fb5ed99b27e676c721531d87d88a799847fe1 100644 (file)
@@ -1,3 +1,13 @@
+siridb-server (2.0.53-0~cb1) unstable; urgency=medium
+
+  * New upstream release
+    - Fixed bug with rollback after failed HTTP request for a new database
+    - Use heap instead of stack for translating a forward query
+    - Fixed reaclloc with zero bug
+    - Update testing files
+
+ -- Jeroen van der Heijden <jeroen@cesbit.com>  Wed, 22 Oct 2025 10:04:00 +0200
+
 siridb-server (2.0.52-0~cb1) unstable; urgency=medium
 
   * New upstream release
index ff7d6017e55fc4cf4969f8b8bc32ca2be8a71115..c2701d561e68a96cff25a09c98f6d5427529ede2 100644 (file)
@@ -1,4 +1,4 @@
-FROM amd64/alpine:3.13
+FROM amd64/alpine
 RUN apk update && \
     apk upgrade && \
     apk add gcc make libuv-dev musl-dev pcre2-dev yajl-dev util-linux-dev linux-headers git && \
@@ -11,7 +11,7 @@ RUN apk update && \
     make clean && \
     make
 
-FROM amd64/alpine:3.12.4
+FROM amd64/alpine
 RUN apk update && \
     apk add pcre2 libuv libuuid yajl && \
     mkdir -p /etc/siridb && \
index 07f5d401f822941aa6563f9e28ba9c080b576e30..d78b9822e8f4760d0e9839899e354bb45bbf1328 100644 (file)
@@ -6,7 +6,7 @@
 
 #define SIRIDB_VERSION_MAJOR 2
 #define SIRIDB_VERSION_MINOR 0
-#define SIRIDB_VERSION_PATCH 52
+#define SIRIDB_VERSION_PATCH 53
 
 /*
  * Use SIRIDB_VERSION_PRE_RELEASE for alpha release versions.
index c9ae84292c956438d6336d2664b6fba4b4b57011..d89db60ec70df21967b064f43d8d627880816a89 100644 (file)
@@ -1,4 +1,4 @@
-FROM ubuntu:18.04 AS builder
+FROM ubuntu:24.04 AS builder
 RUN apt-get update && \
     apt-get install -y \
         libcleri-dev \
@@ -13,9 +13,9 @@ COPY ./include/ ./include/
 COPY ./Release/ ./Release/
 RUN cd ./Release && \
     make clean && \
-    CFLAGS="-Werror -std=gnu89" make
+    CFLAGS="-Werror -std=gnu89" make all
 
-FROM python
+FROM python:3.12
 RUN apt-get update && \
     apt-get install -y \
         valgrind \
index 2427dedc1780ccf83d0d18b4de31ef8ad4470035..f73ba9a328aeaf11fc8c6ee08879b21e4fa29811 100755 (executable)
@@ -105,7 +105,7 @@ if __name__ == '__main__':
 
     args = parser.parse_args()
 
-    loop = asyncio.get_event_loop()
+    loop = asyncio.new_event_loop()
     loop.run_until_complete(print_google_finance_data(
         ticker=args.ticker,
         interval=args.interval,
index c53d53399ea3e062171442d6b8ec4044490e1b99..d9a5afad17bc01957838061658a4aa4ddf37132e 100755 (executable)
@@ -1,11 +1,9 @@
 #!/usr/bin/python3
-import os
 import sys
 import argparse
 import asyncio
 import time
 import logging
-import string
 import random
 import datetime
 import math
@@ -86,7 +84,7 @@ class Series:
         }[self.kind]
         self.lts = self._timestamp
 
-        factor = 10**r.randint(int(self.kind == int), 9)
+        factor = 10**r.randint(int(self.kind is int), 9)
         self.random_range = (
             int(r.random() * -factor),
             int(r.random() * factor) + 1)
@@ -96,13 +94,13 @@ class Series:
         self.likely_equal = r.choice([0.01, 0.1, 0.2, 0.5, 0.99])
         self.likely_change_sign = r.choice([0.0, 0.1, 0.25, 0.5, 0.9])
 
-        self.as_int = wrong_type and self.kind == float and r.random() > 0.9
+        self.as_int = wrong_type and self.kind is float and r.random() > 0.9
         self.likely_inf = r.random() * 0.2 \
-            if self.kind == float and r.random() > 0.95 else False
+            if self.kind is float and r.random() > 0.95 else False
         self.likely_nan = r.random() * 0.2 \
-            if self.kind == float and r.random() > 0.95 else False
+            if self.kind is float and r.random() > 0.95 else False
 
-        self.gen_float = wrong_type and self.kind == int and r.random() > 0.97
+        self.gen_float = wrong_type and self.kind is int and r.random() > 0.97
 
         self.name = self._gen_name()
         Series._series.append(self)
@@ -488,7 +486,7 @@ Home-page: https://github.com/cesbit/siridb-email-check
             [s.strip() for s in server.split(':')]
             for server in args.servers.split(',')])
 
-    loop = asyncio.get_event_loop()
+    loop = asyncio.new_event_loop()
     loop.run_until_complete(dump_data(siri, args))
 
     total_time = time.time() - start_time
index 77d95e3be7cc424aef613ef326dc73422cdfb423..98263b8804b35e27d9d7d5e4ac912cd26706deff 100755 (executable)
@@ -35,5 +35,5 @@ if __name__ == '__main__':
     parser.add_argument("-p", "--port", type=int, default=9104)
     args = parser.parse_args()
 
-    loop = asyncio.get_event_loop()
+    loop = asyncio.new_event_loop()
     loop.run_until_complete(main(args))
index 8e92db589c072feb4c74e2b5b0a73bf6faebca0a..25dc3689de2e1a9701152893eb5bc648c732b39f 100644 (file)
@@ -27,7 +27,7 @@ class TestCompression(TestBase):
     title = 'Test compression'
 
     GEN_POINTS = functools.partial(
-        gen_points, n=100, time_precision=TIME_PRECISION)
+        gen_points, time_precision=TIME_PRECISION)
 
     async def _test_series(self, client):
 
index 5cb5b44c0a562ac5454678a95bba77eb17679c31..2e35d1318c7c1ea6add2891a919fe5495d13fe0c 100644 (file)
@@ -170,6 +170,10 @@ class TestGroup(TestBase):
             await self.client0.query('count groups where series > 2'),
             {'groups': 2})
 
+        self.assertEqual(
+            await self.client0.query('select * from * before now'),
+            DATA)
+
         self.client0.close()
         self.client1.close()
 
index 37282103ce7b807a0b263cfe3af9948c8f47dc51..d801c1b4db1ba5fc34030682c70224fb1b79364b 100644 (file)
@@ -36,7 +36,7 @@ class TestHTTPAPI(TestBase):
         await self.client0.connect()
 
         x = requests.get(
-            f'http://localhost:9020/get-version', auth=('sa', 'siri'))
+            'http://localhost:9020/get-version', auth=('sa', 'siri'))
 
         self.assertEqual(x.status_code, 200)
         v = x.json()
@@ -44,7 +44,7 @@ class TestHTTPAPI(TestBase):
         self.assertTrue(isinstance(v[0], str))
 
         x = requests.post(
-            f'http://localhost:9020/insert/dbtest',
+            'http://localhost:9020/insert/dbtest',
             auth=('iris', 'siri'),
             headers={'Content-Type': 'application/json'})
 
@@ -62,7 +62,7 @@ class TestHTTPAPI(TestBase):
         }
 
         x = requests.post(
-            f'http://localhost:9020/insert/dbtest',
+            'http://localhost:9020/insert/dbtest',
             data=json.dumps(data),
             auth=('iris', 'siri'),
             headers={'Content-Type': 'application/json'}
@@ -81,7 +81,7 @@ class TestHTTPAPI(TestBase):
         }
 
         x = requests.post(
-            f'http://localhost:9021/new-pool',
+            'http://localhost:9021/new-pool',
             data=json.dumps(data),
             auth=('sa', 'siri'),
             headers={'Content-Type': 'application/json'})
@@ -94,7 +94,7 @@ class TestHTTPAPI(TestBase):
 
         data = {'data': [[1579521271, 10], [1579521573, 20]]}
         x = requests.post(
-            f'http://localhost:9020/insert/dbtest',
+            'http://localhost:9020/insert/dbtest',
             json=data,
             auth=('iris', 'siri'))
 
@@ -103,7 +103,7 @@ class TestHTTPAPI(TestBase):
             'success_msg': 'Successfully inserted 2 point(s).'})
 
         x = requests.post(
-            f'http://localhost:9020/query/dbtest',
+            'http://localhost:9020/query/dbtest',
             json={'q': 'select * from "data"'},
             auth=('iris', 'siri'))
 
@@ -111,7 +111,7 @@ class TestHTTPAPI(TestBase):
         self.assertEqual(x.json(), data)
 
         x = requests.post(
-            f'http://localhost:9020/query/dbtest',
+            'http://localhost:9020/query/dbtest',
             json={'q': 'select * from "data"', 't': 'ms'},
             auth=('iris', 'siri'))
 
@@ -123,7 +123,7 @@ class TestHTTPAPI(TestBase):
         self.assertEqual(x.json(), data)
 
         x = requests.post(
-            f'http://localhost:9020/query/dbtest',
+            'http://localhost:9020/query/dbtest',
             data=qpack.packb({
                 'q': 'select sum(1579600000) from "data"',
                 't': 'ms'}),
@@ -136,7 +136,7 @@ class TestHTTPAPI(TestBase):
             {'data': [[1579600000000, 30]]})
 
         x = requests.post(
-            f'http://localhost:9021/new-account',
+            'http://localhost:9021/new-account',
             json={'account': 't', 'password': ''},
             auth=('sa', 'siri'))
 
@@ -146,7 +146,7 @@ class TestHTTPAPI(TestBase):
                 'service account name should have at least 2 characters'})
 
         x = requests.post(
-            f'http://localhost:9021/new-account',
+            'http://localhost:9021/new-account',
             json={'account': 'tt', 'password': 'pass'},
             auth=('sa', 'siri'))
 
@@ -163,19 +163,19 @@ class TestHTTPAPI(TestBase):
 
         auth = ('tt', 'pass')
         x = requests.post(
-            f'http://localhost:9021/new-replica', json=data, auth=auth)
+            'http://localhost:9021/new-replica', json=data, auth=auth)
 
         self.assertEqual(x.status_code, 400)
         self.assertEqual(x.json(), {
             'error_msg': "database name already exists: 'dbtest'"})
 
         x = requests.post(
-            f'http://localhost:9022/new-replica', json=data, auth=auth)
+            'http://localhost:9022/new-replica', json=data, auth=auth)
         self.assertEqual(x.status_code, 401)
 
         auth = ('sa', 'siri')
         x = requests.post(
-            f'http://localhost:9022/new-replica', json=data, auth=auth)
+            'http://localhost:9022/new-replica', json=data, auth=auth)
 
         self.assertEqual(x.status_code, 400)
         self.assertEqual(x.json(), {
@@ -185,7 +185,7 @@ class TestHTTPAPI(TestBase):
 
         data['port'] = 9000
         x = requests.post(
-            f'http://localhost:9022/new-replica', json=data, auth=auth)
+            'http://localhost:9022/new-replica', json=data, auth=auth)
         self.assertEqual(x.status_code, 200)
         self.assertEqual(x.json(), 'OK')
 
@@ -193,7 +193,7 @@ class TestHTTPAPI(TestBase):
         await self.assertIsRunning(self.db, self.client0, timeout=50)
 
         x = requests.get(
-            f'http://localhost:9022/get-databases', auth=auth)
+            'http://localhost:9022/get-databases', auth=auth)
         self.assertEqual(x.status_code, 200)
         self.assertEqual(x.json(), ['dbtest'])
 
index 9ce56c322f355ba52bc75134257a83a0f77ebe4f..186b634e6dd838a91e70f744b7bb1fb294727507 100644 (file)
@@ -40,6 +40,6 @@ async def _run_test(test, loglevel):
 
 def run_test(test, loglevel='CRITICAL'):
     assert isinstance(test, TestBase)
-    loop = asyncio.get_event_loop()
+    loop = asyncio.new_event_loop()
     cleanup()
     loop.run_until_complete(_run_test(test, loglevel))
index 63ebae5468af42ee906f962bf542d856e9c6c99d..94ca7980014d8514f3cd146d1f43260ca39806ec 100644 (file)
@@ -136,15 +136,16 @@ class Server:
                         ' -H' if self.HOLD_TERM else ''),
                 shell=True)
         elif self.TERMINAL == 'xterm':
-            self.proc = subprocess.Popen(
-               'xterm {}-title {} -geometry {} -e "{}{} --config {}"'
-               .format('-hold ' if self.HOLD_TERM else '',
-                       self.name,
-                       self.GEOMETRY,
-                       VALGRIND if self.MEM_CHECK else '',
-                       SIRIDBC.format(BUILDTYPE=self.BUILDTYPE),
-                       self.cfgfile),
-               shell=True)
+            self.proc = subprocess.Popen((
+                    'xterm {}-title {} -geometry {} -e "{}{} --config {} '
+                    '--log-colorized"'
+                ).format('-hold ' if self.HOLD_TERM else '',
+                self.name,
+                self.GEOMETRY,
+                VALGRIND if self.MEM_CHECK else '',
+                SIRIDBC.format(BUILDTYPE=self.BUILDTYPE),
+                self.cfgfile),
+                shell=True)
         elif self.TERMINAL is None:
             errfn = f'testdir/{self.test_title}-{self.name}-err.log'
             outfn = f'testdir/{self.test_title}-{self.name}-out.log'
index 0cf8ec5ff39727fcbd8fe5f07aa85d699f87bfdd..6d876498559271692f91895c5264843b5279c68f 100644 (file)
@@ -826,22 +826,31 @@ static int CT_node_resize(ct_node_t * node, uint8_t pos)
         uint8_t diff = node->offset - pos;
         uint8_t oldn = node->n;
         node->n += diff;
-        tmp = (ct_nodes_t *) realloc(
-                node->nodes,
-                node->n * sizeof(ct_nodes_t));
-        if (tmp == NULL && node->n)
+        if (node->n == 0)
         {
-            node->n -= diff;
-            rc = -1;
+            free(node->nodes);
+            node->nodes = NULL;
+            node->offset = pos;
         }
         else
         {
-            node->nodes = tmp;
-            node->offset = pos;
-            memmove(node->nodes + diff,
+            tmp = (ct_nodes_t *) realloc(
                     node->nodes,
-                    oldn * sizeof(ct_nodes_t));
-            memset(node->nodes, 0, diff * sizeof(ct_nodes_t));
+                    node->n * sizeof(ct_nodes_t));
+            if (tmp == NULL)
+            {
+                node->n -= diff;
+                rc = -1;
+            }
+            else
+            {
+                node->nodes = tmp;
+                node->offset = pos;
+                memmove(node->nodes + diff,
+                        node->nodes,
+                        oldn * sizeof(ct_nodes_t));
+                memset(node->nodes, 0, diff * sizeof(ct_nodes_t));
+            }
         }
     }
     else if (pos >= node->offset + node->n)
index d56fa4d2f173fb761897d0ca376543a03b5d9601..4f39071dd59eae908fc22a08ea7b9a3bedd8a208 100644 (file)
@@ -1039,17 +1039,25 @@ static siridb_points_t * AGGREGATE_filter(
 
         if (source->len > points->len)
         {
-            dpt = (siridb_point_t *) realloc(
-                    points->data,
-                    points->len * sizeof(siridb_point_t));
-            if (dpt == NULL && points->len)
+            if (points->len == 0)
             {
-                /* not critical */
-                log_error("Error while re-allocating memory for points");
+                free(points->data);
+                points->data = NULL;
             }
             else
             {
-                points->data = dpt;
+                dpt = (siridb_point_t *) realloc(
+                        points->data,
+                        points->len * sizeof(siridb_point_t));
+                if (dpt == NULL)
+                {
+                    /* not critical */
+                    log_error("Error while re-allocating memory for points");
+                }
+                else
+                {
+                    points->data = dpt;
+                }
             }
         }
     }
@@ -1214,15 +1222,23 @@ static siridb_points_t * AGGREGATE_group_by(
     if (points->len < max_sz)
     {
         /* shrink points allocation */
-        point = realloc(points->data, points->len * sizeof(siridb_point_t));
-        if (point == NULL && points->len)
+        if (points->len == 0)
         {
-            /* not critical */
-            log_error("Re-allocation points failed.");
+            free(points->data);
+            points->data = NULL;
         }
         else
         {
-            points->data = point;
+            point = realloc(points->data, points->len * sizeof(siridb_point_t));
+            if (point == NULL)
+            {
+                /* not critical */
+                log_error("Re-allocation points failed.");
+            }
+            else
+            {
+                points->data = point;
+            }
         }
     }
     /* else { assert (points->len == max_sz); } */
index d0759f555c4f5dfa1d581c18cf2d664224085ffa..7a882e9aeb5d6f8ebe4e2bd0d348889ce8d93aea 100644 (file)
@@ -92,9 +92,17 @@ siridb_points_t * siridb_points_new(size_t size, points_tp tp)
  */
 int siridb_points_resize(siridb_points_t * points, size_t n)
 {
+    siridb_point_t * tmp;
     assert( points->len <= n );
-    siridb_point_t * tmp = realloc(points->data, sizeof(siridb_point_t) * n);
-    if (tmp == NULL && n)
+    if (n == 0)
+    {
+        free(points->data);
+        points->data = NULL;
+        return 0;
+    }
+
+    tmp = realloc(points->data, sizeof(siridb_point_t) * n);
+    if (tmp == NULL)
     {
         return -1;
     }
index 4ba392edd4342b2af2c926e46bb582cff4f6fb58..fccf58a1d42ddcfc8558799da8e10c13671a89e6 100644 (file)
@@ -31,7 +31,6 @@
 
 
 #define QUERY_TOO_LONG -1
-#define QUERY_MAX_LENGTH 8192
 #define QUERY_EXTRA_ALLOC_SIZE 200
 #define SIRIDB_FWD_SERVERS_TIMEOUT 5000  /* 5 seconds  */
 
@@ -679,11 +678,11 @@ static int QUERY_to_packer(qp_packer_t * packer, siridb_query_t * query)
     if (query->flags & SIRIDB_QUERY_FLAG_REBUILD)
     {
         /* reserve 200 extra chars */
-        char buffer[packer->alloc_size];
+        char * buffer = malloc(packer->alloc_size);
         size_t size = packer->alloc_size;
         siridb_t * siridb = query->siridb;
 
-        rc = QUERY_rebuild(
+        rc = (buffer == NULL) ? -1 : QUERY_rebuild(
                 siridb,
                 cleri_gn(query->pr->tree->children),
                 buffer,
@@ -704,6 +703,7 @@ static int QUERY_to_packer(qp_packer_t * packer, siridb_query_t * query)
                     (const unsigned char *) buffer,
                     packer->alloc_size - size);
         }
+        free(buffer);
     }
     else
     {
index ff83275eb3df871749359ff1ff9f650eb714baa4..333df8b649523f858bce7c247d456e6e05175287 100644 (file)
@@ -628,17 +628,26 @@ void siridb_series_remove_shard(
         else
         {
             series->idx_len -= offset;
-            idx = (idx_t *) realloc(
-                        series->idx,
-                        series->idx_len * sizeof(idx_t));
-            if (idx == NULL && series->idx_len)
+            if (series->idx_len == 0)
             {
-                log_error("Re-allocation failed while removing series from "
-                        "shard index");
+                free(series->idx);
+                series->idx = NULL;
             }
             else
             {
-                series->idx = idx;
+                idx = (idx_t *) realloc(
+                            series->idx,
+                            series->idx_len * sizeof(idx_t));
+                if (idx == NULL)
+                {
+                    log_error(
+                        "Re-allocation failed while removing series from "
+                        "shard index");
+                }
+                else
+                {
+                    series->idx = idx;
+                }
             }
             if (series->start >= start && series->start < end)
             {
@@ -1403,19 +1412,27 @@ int siridb_series_optimize_shard(
         }
 
         /* shrink memory to the new size */
-        idx = (idx_t *) realloc(
-                series->idx,
-                series->idx_len * sizeof(idx_t));
-        if (idx == NULL && series->idx_len)
+        if (series->idx_len == 0)
         {
-            /* this is not critical since the original allocated block still
-             * works.
-             */
-            log_error("Shrinking memory for one series has failed!");
+            free(series->idx);
+            series->idx = NULL;
         }
         else
         {
-            series->idx = idx;
+            idx = (idx_t *) realloc(
+                    series->idx,
+                    series->idx_len * sizeof(idx_t));
+            if (idx == NULL)
+            {
+                /* this is not critical since the original allocated block still
+                * works.
+                */
+                log_error("Shrinking memory for one series has failed!");
+            }
+            else
+            {
+                series->idx = idx;
+            }
         }
     }
     else
index b35baa089c883dd26bd833d89b882e34dd92e615..22c5583ceff6df018ae0b05bc32e4a3db6c9329c 100644 (file)
@@ -334,12 +334,10 @@ static void CLIENT_err(
         ...)
 {
     char err_msg[SIRI_MAX_SIZE_ERR_MSG];
-
     va_list args;
     va_start(args, fmt);
     vsnprintf(err_msg, SIRI_MAX_SIZE_ERR_MSG, fmt, args);
     va_end(args);
-
     sirinet_pkg_t * package = sirinet_pkg_err(
             adm_client->pid,
             strlen(err_msg),
@@ -359,7 +357,6 @@ static void CLIENT_err(
     }
 
     sirinet_stream_decref(siri.client);
-
     uv_close((uv_handle_t *) &siri.timer, NULL);
 }
 
index bc0fc2396e5ddd57f56db1f7b48d7c5e789aae5a..1405cc1eb06dcdc45bb9556da6650467c021ff85 100644 (file)
@@ -1001,14 +1001,19 @@ static cproto_server_t SERVICE_on_get_databases(
 void siri_service_request_rollback(const char * dbpath)
 {
     size_t dbpath_len = strlen(dbpath);
-    char dbfn[dbpath_len + max_filename_sz];
-
+    char * dbfn = malloc(dbpath_len + max_filename_sz + 1);
+    if (dbfn == NULL)
+    {
+        log_error("Roll-back creating new database has failed.");
+        return;
+    }
     sprintf(dbfn, "%s%s", dbpath, DB_CONF_FN);
     unlink(dbfn);
     sprintf(dbfn, "%s%s", dbpath, DB_DAT_FN);
     unlink(dbfn);
     sprintf(dbfn, "%s%s", dbpath, REINDEX_FN);
     unlink(dbfn);
+    free(dbfn);
     if (rmdir(dbpath))
     {
         log_error("Roll-back creating new database has failed.");